Load the datasets into memory

There are better ways to do this, but since the datasets are small enough, we can keep it simple.


In [1]:
from __future__ import absolute_import, division, print_function

MEAN_PIXEL_VALUE = 128
PIXEL_STANDARD_DEVIATION = 80
SEGMENT_SIZE = 128

In [2]:
import numpy as np

def preprocess_image(image):
    """Converts the given image to grayscale, subtracts the mean pixel value, and divides by the standard deviation*.
    
    The returned np.array uses 16-bit floats to conserve memory.
    """
    
    global MEAN_PIXEL_VALUE
    global PIXEL_STANDARD_DEVIATION
    
    image = image.convert('L')
    image = np.array(image).astype(np.float16)
    image = image - MEAN_PIXEL_VALUE
    image = image / PIXEL_STANDARD_DEVIATION
    
    return image

In [3]:
from os import walk
from PIL import Image

def load_images(dir_path, n_max=-1):
    """Loads the images from dir_path into RAM.
    
    A cutoff can me specified as the second argument.
    
    Returns the loaded images.
    """
    images = []
    
    for filename in next(walk(dir_path))[2]:
        if len(images) == n_max:
            break
        
        images.append(preprocess_image(Image.open(dir_path + filename)))
        
    return images

In [4]:
training_images = []
training_labels = []

training_images.extend(load_images('../data/train/no-text/'))
n_no_text = len(training_images)
training_labels.extend([[1, 0] for i in range(n_no_text)])

training_images.extend(load_images('../data/train/text/'))
n_text = len(training_images) - n_no_text
training_labels.extend([[0, 1] for i in range(n_text)])

training_images = np.array(training_images)
training_labels = np.array(training_labels)

In [5]:
validation_images = []
validation_labels = []

validation_images.extend(load_images('../data/valid/no-text/'))
n_no_text = len(validation_images)
validation_labels.extend([[1, 0] for i in range(n_no_text)])

validation_images.extend(load_images('../data/valid/text/'))
n_text = len(validation_images) - n_no_text
validation_labels.extend([[0, 1] for i in range(n_text)])

validation_images = np.array(validation_images)
validation_labels = np.array(validation_labels)

Train model


In [6]:
def get_batch(batch_size, use_training_set=True):
    """Selects a random batch from either the training or the validation set.
    
    Returns the loaded images and their labels.
    """
    
    if use_training_set:
        indexes = np.random.choice(np.arange(len(training_images)), batch_size, False)
        images = training_images[indexes]
        labels = training_labels[indexes]
    else:
        indexes = np.random.choice(np.arange(len(validation_images)), batch_size, False)
        images = validation_images[indexes]
        labels = validation_labels[indexes]
        
    return images, labels

In [7]:
from matplotlib import pyplot as plt
%matplotlib inline

def fill_feed_dict(pl_images, pl_labels, pl_learning_rate, pl_keep_prob, batch_size, learning_rate, keep_prob):
    """Creates a feed_dict, which is used by TensorFlow to train/validate the ConvNet.
    
    The feed_dict contains both data, labels, and hyperparameters that change during training:
        * The learning_rate
        * The probability that the dropout layer(s) NOT drop a given value (keep_prob)
    
    Returns the feed_dict.
    """
    
    global SEGMENT_SIZE
    
    training = learning_rate != 0
    images, labels = get_batch(batch_size, training)
    
    images = np.reshape(images, (batch_size, SEGMENT_SIZE, SEGMENT_SIZE, 1))
    labels = np.reshape(labels, (batch_size, 2))
    
    feed_dict = {
        pl_images: images,
        pl_labels: labels,
        pl_learning_rate: learning_rate,
        #pl_keep_prob: keep_prob # Disabled so that the graph can run on Android
    }
    
    return feed_dict


test_feed_dict = fill_feed_dict('images', 'labels', 'learning_rate', 'keep_prob', 2, 0.01, 1.0)

for i in range(len(test_feed_dict['images'])):
    plt.figure()
    plt.title(str(test_feed_dict['labels'][i]))
    plt.imshow(test_feed_dict['images'][i].squeeze(), cmap=plt.cm.gray)



In [8]:
import tensorflow as tf

WEIGHT_PENALTY_RATE = 3e-3


def weight_variable(shape, stddev):
    global WEIGHT_PENALTY_RATE
    
    initial = tf.truncated_normal(shape, stddev=stddev)
    weights = tf.Variable(initial, name='weights')
    
    tf.add_to_collection('losses', tf.mul(tf.nn.l2_loss(weights), WEIGHT_PENALTY_RATE))
    
    return weights

def bias_variable(shape, init):
    global WEIGHT_PENALTY_RATE
    
    initial = tf.constant(init, shape=shape)
    biases = tf.Variable(initial, name='biases')
    
    tf.add_to_collection('losses', tf.mul(tf.nn.l2_loss(biases), WEIGHT_PENALTY_RATE))
    
    return biases

def conv2d(x, W):
    """Creates a 2D convolutional layer with stride 2.
    
    Returns the created layer.
    """
    return tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1], padding='SAME')

In [9]:
# Make sure we don't count losses multiple times, by resetting the graph
tf.reset_default_graph()


x = tf.placeholder(tf.float32, shape=[None, SEGMENT_SIZE, SEGMENT_SIZE, 1], name='input')
y_ = tf.placeholder(tf.float32, shape=[None, 2], name='ground_truth')

with tf.name_scope('conv1') as scope:
    W_conv1 = weight_variable([5, 5, 1, 32], 1e-4)
    b_conv1 = bias_variable([32], 0.1)
    h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)

with tf.name_scope('mp1') as scope:
    h_pool1 = max_pool_2x2(h_conv1)

with tf.name_scope('conv2') as scope:
    W_conv2 = weight_variable([5, 5, 32, 64], 1e-4)
    b_conv2 = bias_variable([64], 0.1)
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)

n = SEGMENT_SIZE // (2 ** 4)
    
with tf.name_scope('mp2') as scope:
    h_pool2 = max_pool_2x2(h_conv2)
    h_pool2_flat = tf.reshape(h_pool2, [-1, n * n * 64])

with tf.name_scope('fc1') as scope:
    W_fc1 = weight_variable([n * n * 64, 512], 0.04)
    b_fc1 = bias_variable([512], 0.1)
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# For some reason the Android version of TF doesn't like dropout
#with tf.name_scope('fc1_dropout):
#    pl_keep_prob = tf.placeholder(tf.float32, name='keep_prob')
#    h_fc1_drop = tf.nn.dropout(h_fc1, pl_keep_prob)
pl_keep_prob = 'foo'

with tf.name_scope('fc2') as scope:
    W_fc2 = weight_variable([512, 2], 0.1)
    b_fc2 = bias_variable([2], 0.1)
    o_fc2 = tf.matmul(h_fc1, W_fc2) + b_fc2

y_conv = tf.nn.softmax(o_fc2, name='output')

with tf.name_scope('cross_entropy_mean'):
    cross_entropy_mean = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y_conv, 1e-10, 1)))
tf.add_to_collection('losses', cross_entropy_mean)

In [10]:
with tf.name_scope('loss'):
    loss = tf.add_n(tf.get_collection('losses'))

pl_learning_rate = tf.placeholder(tf.float32, shape=[])

train_step = tf.train.MomentumOptimizer(pl_learning_rate, momentum=0.9).minimize(loss)

In [11]:
with tf.name_scope('accuracy'):
    correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

In [12]:
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
writer = tf.train.SummaryWriter('./training_logs/', sess.graph_def)
sess.run(tf.initialize_all_variables())

# Training parameters
test_interval = 600
test_iters = 300
display_interval = 50 # Affects how often the training loss is printed
n_iters = test_interval * 60 # The total number of training steps
batch_size = 100

# Learning rate parameters
learning_rate = 0.003
step_size = test_interval * 30
lr_step_rate = 0.3

# These are plotted later to get feedback on how the training went
all_validation_accuracies = []
all_validation_losses = []
all_training_losses = []

for i in range(n_iters):
    if i % test_interval == 0:
        validation_accuracies = []
        validation_losses = []
        
        for j in range(test_iters):
            feed_dict = fill_feed_dict(x, y_, pl_learning_rate, pl_keep_prob, batch_size, 0, 1)
            validation_accuracy, validation_loss = sess.run([accuracy, loss], feed_dict=feed_dict)
        
            validation_accuracies.append(validation_accuracy)
            validation_losses.append(validation_loss)
        
        mean_validation_accuracy = np.mean(validation_accuracies)
        mean_validation_loss = np.mean(validation_losses)
        
        print('Validation accuracy: ' + str(mean_validation_accuracy))
        print('Validation loss: ' + str(mean_validation_loss))
        
        all_validation_accuracies.append(mean_validation_accuracy)
        all_validation_losses.append(mean_validation_loss)
        
    feed_dict = fill_feed_dict(x, y_, pl_learning_rate, pl_keep_prob, batch_size, learning_rate, 0.5)
    _, training_loss = sess.run([train_step, loss], feed_dict=feed_dict)
    
    if i % display_interval == 0:
        print('Training loss: ' + str(training_loss))
        print('========')
        
        all_training_losses.append(training_loss)
        
    if i % step_size == 0 and i != 0:
        learning_rate = learning_rate * lr_step_rate
        print('Updated learning rate: ' + str(learning_rate))
        print('========')


Validation accuracy: 0.674867
Validation loss: 4.25026
Training loss: 4.26514
========
Training loss: 4.23812
========
Training loss: 4.20271
========
Training loss: 4.16639
========
Training loss: 4.12948
========
Training loss: 4.0972
========
Training loss: 4.05652
========
Training loss: 4.02717
========
Training loss: 3.99817
========
Training loss: 3.96037
========
Training loss: 3.93933
========
Training loss: 3.92024
========
Validation accuracy: 0.555467
Validation loss: 3.87302
Training loss: 3.8672
========
Training loss: 3.82508
========
Training loss: 3.79078
========
Training loss: 3.76872
========
Training loss: 3.72565
========
Training loss: 3.68626
========
Training loss: 3.66105
========
Training loss: 3.63561
========
Training loss: 3.6431
========
Training loss: 3.58674
========
Training loss: 3.53453
========
Training loss: 3.51864
========
Validation accuracy: 0.631033
Validation loss: 3.48311
Training loss: 3.46062
========
Training loss: 3.45905
========
Training loss: 3.43048
========
Training loss: 3.40157
========
Training loss: 3.37407
========
Training loss: 3.32951
========
Training loss: 3.32669
========
Training loss: 3.28756
========
Training loss: 3.2702
========
Training loss: 3.25919
========
Training loss: 3.17799
========
Training loss: 3.17726
========
Validation accuracy: 0.616933
Validation loss: 3.17015
Training loss: 3.14579
========
Training loss: 3.13469
========
Training loss: 3.10199
========
Training loss: 3.09321
========
Training loss: 3.09776
========
Training loss: 3.01793
========
Training loss: 2.99315
========
Training loss: 2.96446
========
Training loss: 2.93154
========
Training loss: 2.95104
========
Training loss: 2.90048
========
Training loss: 2.89325
========
Validation accuracy: 0.638567
Validation loss: 2.8715
Training loss: 2.8563
========
Training loss: 2.86229
========
Training loss: 2.80077
========
Training loss: 2.807
========
Training loss: 2.77767
========
Training loss: 2.73214
========
Training loss: 2.74718
========
Training loss: 2.72263
========
Training loss: 2.67422
========
Training loss: 2.65161
========
Training loss: 2.64988
========
Training loss: 2.6543
========
Validation accuracy: 0.6515
Validation loss: 2.60095
Training loss: 2.58959
========
Training loss: 2.58222
========
Training loss: 2.547
========
Training loss: 2.5252
========
Training loss: 2.54485
========
Training loss: 2.50886
========
Training loss: 2.44984
========
Training loss: 2.47659
========
Training loss: 2.45299
========
Training loss: 2.41188
========
Training loss: 2.40056
========
Training loss: 2.35823
========
Validation accuracy: 0.645167
Validation loss: 2.36855
Training loss: 2.36663
========
Training loss: 2.35584
========
Training loss: 2.3383
========
Training loss: 2.30739
========
Training loss: 2.26054
========
Training loss: 2.30227
========
Training loss: 2.23614
========
Training loss: 2.24901
========
Training loss: 2.24644
========
Training loss: 2.16679
========
Training loss: 2.21379
========
Training loss: 2.1447
========
Validation accuracy: 0.678467
Validation loss: 2.14466
Training loss: 2.16176
========
Training loss: 2.11907
========
Training loss: 2.097
========
Training loss: 2.08903
========
Training loss: 2.08613
========
Training loss: 2.03347
========
Training loss: 2.03896
========
Training loss: 2.04787
========
Training loss: 2.03845
========
Training loss: 2.01121
========
Training loss: 2.01985
========
Training loss: 2.00762
========
Validation accuracy: 0.641633
Validation loss: 1.96992
Training loss: 1.95596
========
Training loss: 1.95611
========
Training loss: 1.92933
========
Training loss: 1.8786
========
Training loss: 1.90874
========
Training loss: 1.91621
========
Training loss: 1.88077
========
Training loss: 1.85851
========
Training loss: 1.85472
========
Training loss: 1.80923
========
Training loss: 1.81016
========
Training loss: 1.84665
========
Validation accuracy: 0.661567
Validation loss: 1.79546
Training loss: 1.7786
========
Training loss: 1.7742
========
Training loss: 1.76991
========
Training loss: 1.76595
========
Training loss: 1.7285
========
Training loss: 1.72674
========
Training loss: 1.69485
========
Training loss: 1.68993
========
Training loss: 1.72166
========
Training loss: 1.67308
========
Training loss: 1.67152
========
Training loss: 1.65522
========
Validation accuracy: 0.666433
Validation loss: 1.64269
Training loss: 1.61147
========
Training loss: 1.59661
========
Training loss: 1.59558
========
Training loss: 1.61146
========
Training loss: 1.6351
========
Training loss: 1.58348
========
Training loss: 1.57674
========
Training loss: 1.57475
========
Training loss: 1.54123
========
Training loss: 1.55413
========
Training loss: 1.50453
========
Training loss: 1.49032
========
Validation accuracy: 0.674333
Validation loss: 1.50105
Training loss: 1.49813
========
Training loss: 1.51213
========
Training loss: 1.46932
========
Training loss: 1.5234
========
Training loss: 1.45216
========
Training loss: 1.45955
========
Training loss: 1.4565
========
Training loss: 1.41783
========
Training loss: 1.41154
========
Training loss: 1.41975
========
Training loss: 1.40752
========
Training loss: 1.42113
========
Validation accuracy: 0.644833
Validation loss: 1.39561
Training loss: 1.37506
========
Training loss: 1.34956
========
Training loss: 1.348
========
Training loss: 1.38617
========
Training loss: 1.31389
========
Training loss: 1.33975
========
Training loss: 1.35435
========
Training loss: 1.29732
========
Training loss: 1.31019
========
Training loss: 1.29807
========
Training loss: 1.27165
========
Training loss: 1.26645
========
Validation accuracy: 0.667367
Validation loss: 1.27528
Training loss: 1.27354
========
Training loss: 1.25528
========
Training loss: 1.26879
========
Training loss: 1.22486
========
Training loss: 1.20633
========
Training loss: 1.2269
========
Training loss: 1.17731
========
Training loss: 1.21932
========
Training loss: 1.17253
========
Training loss: 1.18368
========
Training loss: 1.16839
========
Training loss: 1.14087
========
Validation accuracy: 0.6476
Validation loss: 1.18717
Training loss: 1.20178
========
Training loss: 1.17431
========
Training loss: 1.15562
========
Training loss: 1.13356
========
Training loss: 1.14977
========
Training loss: 1.12439
========
Training loss: 1.10833
========
Training loss: 1.06242
========
Training loss: 1.11126
========
Training loss: 1.07437
========
Training loss: 1.08
========
Training loss: 1.10332
========
Validation accuracy: 0.702033
Validation loss: 1.07448
Training loss: 1.05366
========
Training loss: 1.08033
========
Training loss: 1.11334
========
Training loss: 1.05141
========
Training loss: 1.02916
========
Training loss: 1.04614
========
Training loss: 1.01725
========
Training loss: 1.00529
========
Training loss: 0.993457
========
Training loss: 1.03014
========
Training loss: 1.0253
========
Training loss: 0.970105
========
Validation accuracy: 0.6295
Validation loss: 1.03158
Training loss: 0.996037
========
Training loss: 0.965808
========
Training loss: 0.997404
========
Training loss: 1.06588
========
Training loss: 0.942704
========
Training loss: 0.943225
========
Training loss: 0.958406
========
Training loss: 0.965719
========
Training loss: 0.970788
========
Training loss: 0.906713
========
Training loss: 0.909191
========
Training loss: 0.903681
========
Validation accuracy: 0.705433
Validation loss: 0.919681
Training loss: 0.882025
========
Training loss: 0.902369
========
Training loss: 0.935001
========
Training loss: 0.928891
========
Training loss: 0.905774
========
Training loss: 0.906355
========
Training loss: 0.911852
========
Training loss: 0.869219
========
Training loss: 0.88522
========
Training loss: 0.856069
========
Training loss: 0.85469
========
Training loss: 0.85584
========
Validation accuracy: 0.679667
Validation loss: 0.869914
Training loss: 0.835444
========
Training loss: 0.891346
========
Training loss: 0.844483
========
Training loss: 0.84388
========
Training loss: 0.857014
========
Training loss: 0.804156
========
Training loss: 0.814947
========
Training loss: 0.795454
========
Training loss: 0.813334
========
Training loss: 0.83428
========
Training loss: 0.848931
========
Training loss: 0.79487
========
Validation accuracy: 0.7149
Validation loss: 0.790164
Training loss: 0.762234
========
Training loss: 0.746101
========
Training loss: 0.757632
========
Training loss: 0.787042
========
Training loss: 0.748132
========
Training loss: 0.771254
========
Training loss: 0.763741
========
Training loss: 0.776661
========
Training loss: 0.741642
========
Training loss: 0.762061
========
Training loss: 0.776765
========
Training loss: 0.767417
========
Validation accuracy: 0.7304
Validation loss: 0.730282
Training loss: 0.758017
========
Training loss: 0.726283
========
Training loss: 0.711864
========
Training loss: 0.709159
========
Training loss: 0.732935
========
Training loss: 0.708268
========
Training loss: 0.69073
========
Training loss: 0.690316
========
Training loss: 0.738144
========
Training loss: 0.684596
========
Training loss: 0.705328
========
Training loss: 0.662748
========
Validation accuracy: 0.708633
Validation loss: 0.697412
Training loss: 0.705615
========
Training loss: 0.686316
========
Training loss: 0.73743
========
Training loss: 0.684003
========
Training loss: 0.672945
========
Training loss: 0.6845
========
Training loss: 0.70059
========
Training loss: 0.661678
========
Training loss: 0.64464
========
Training loss: 0.713032
========
Training loss: 0.638477
========
Training loss: 0.667013
========
Validation accuracy: 0.7127
Validation loss: 0.653005
Training loss: 0.661595
========
Training loss: 0.675832
========
Training loss: 0.62467
========
Training loss: 0.654897
========
Training loss: 0.672273
========
Training loss: 0.602683
========
Training loss: 0.674987
========
Training loss: 0.639588
========
Training loss: 0.610708
========
Training loss: 0.616528
========
Training loss: 0.603415
========
Training loss: 0.586389
========
Validation accuracy: 0.665733
Validation loss: 0.646594
Training loss: 0.581613
========
Training loss: 0.604466
========
Training loss: 0.623031
========
Training loss: 0.587981
========
Training loss: 0.599844
========
Training loss: 0.583927
========
Training loss: 0.592893
========
Training loss: 0.594002
========
Training loss: 0.609561
========
Training loss: 0.584454
========
Training loss: 0.641669
========
Training loss: 0.620685
========
Validation accuracy: 0.714633
Validation loss: 0.585279
Training loss: 0.585519
========
Training loss: 0.556172
========
Training loss: 0.591378
========
Training loss: 0.561806
========
Training loss: 0.506113
========
Training loss: 0.589362
========
Training loss: 0.554365
========
Training loss: 0.602482
========
Training loss: 0.582598
========
Training loss: 0.528105
========
Training loss: 0.534635
========
Training loss: 0.529299
========
Validation accuracy: 0.714367
Validation loss: 0.556336
Training loss: 0.524133
========
Training loss: 0.527995
========
Training loss: 0.525091
========
Training loss: 0.526155
========
Training loss: 0.534571
========
Training loss: 0.511987
========
Training loss: 0.511015
========
Training loss: 0.506658
========
Training loss: 0.533961
========
Training loss: 0.486943
========
Training loss: 0.514594
========
Training loss: 0.482604
========
Validation accuracy: 0.751367
Validation loss: 0.507632
Training loss: 0.507752
========
Training loss: 0.512241
========
Training loss: 0.54614
========
Training loss: 0.52793
========
Training loss: 0.469729
========
Training loss: 0.481044
========
Training loss: 0.462995
========
Training loss: 0.487933
========
Training loss: 0.505304
========
Training loss: 0.486866
========
Training loss: 0.503098
========
Training loss: 0.454075
========
Validation accuracy: 0.747533
Validation loss: 0.487338
Training loss: 0.501391
========
Training loss: 0.481414
========
Training loss: 0.469769
========
Training loss: 0.476335
========
Training loss: 0.466804
========
Training loss: 0.460861
========
Training loss: 0.446153
========
Training loss: 0.427924
========
Training loss: 0.476148
========
Training loss: 0.434371
========
Training loss: 0.464847
========
Training loss: 0.479797
========
Validation accuracy: 0.727333
Validation loss: 0.482996
Training loss: 0.427923
========
Training loss: 0.455973
========
Training loss: 0.477321
========
Training loss: 0.5034
========
Training loss: 0.462803
========
Training loss: 0.473092
========
Training loss: 0.42174
========
Training loss: 0.458452
========
Training loss: 0.410249
========
Training loss: 0.431718
========
Training loss: 0.455057
========
Training loss: 0.45174
========
Validation accuracy: 0.713933
Validation loss: 0.470563
Training loss: 0.464666
========
Training loss: 0.424263
========
Training loss: 0.453832
========
Training loss: 0.406136
========
Training loss: 0.407275
========
Training loss: 0.411264
========
Training loss: 0.429449
========
Training loss: 0.493981
========
Training loss: 0.392276
========
Training loss: 0.414562
========
Training loss: 0.44544
========
Training loss: 0.411303
========
Validation accuracy: 0.723833
Validation loss: 0.45028
Training loss: 0.449724
========
Updated learning rate: 0.0009
========
Training loss: 0.380835
========
Training loss: 0.426758
========
Training loss: 0.418629
========
Training loss: 0.434597
========
Training loss: 0.417663
========
Training loss: 0.376186
========
Training loss: 0.406528
========
Training loss: 0.421123
========
Training loss: 0.382269
========
Training loss: 0.411717
========
Training loss: 0.381094
========
Validation accuracy: 0.763333
Validation loss: 0.418719
Training loss: 0.386875
========
Training loss: 0.410595
========
Training loss: 0.440761
========
Training loss: 0.410389
========
Training loss: 0.402051
========
Training loss: 0.389789
========
Training loss: 0.367151
========
Training loss: 0.425091
========
Training loss: 0.390902
========
Training loss: 0.401195
========
Training loss: 0.401057
========
Training loss: 0.460402
========
Validation accuracy: 0.766733
Validation loss: 0.413662
Training loss: 0.425686
========
Training loss: 0.404891
========
Training loss: 0.393435
========
Training loss: 0.396969
========
Training loss: 0.377381
========
Training loss: 0.419374
========
Training loss: 0.403593
========
Training loss: 0.381529
========
Training loss: 0.41017
========
Training loss: 0.415792
========
Training loss: 0.405372
========
Training loss: 0.486064
========
Validation accuracy: 0.769033
Validation loss: 0.402468
Training loss: 0.375625
========
Training loss: 0.378968
========
Training loss: 0.421321
========
Training loss: 0.373028
========
Training loss: 0.368888
========
Training loss: 0.397317
========
Training loss: 0.366822
========
Training loss: 0.380661
========
Training loss: 0.38366
========
Training loss: 0.435613
========
Training loss: 0.386291
========
Training loss: 0.398567
========
Validation accuracy: 0.767133
Validation loss: 0.400517
Training loss: 0.374863
========
Training loss: 0.385019
========
Training loss: 0.368463
========
Training loss: 0.401145
========
Training loss: 0.360366
========
Training loss: 0.390209
========
Training loss: 0.380472
========
Training loss: 0.396535
========
Training loss: 0.398668
========
Training loss: 0.379255
========
Training loss: 0.386188
========
Training loss: 0.383284
========
Validation accuracy: 0.7533
Validation loss: 0.410069
Training loss: 0.381921
========
Training loss: 0.355225
========
Training loss: 0.37561
========
Training loss: 0.39778
========
Training loss: 0.394385
========
Training loss: 0.359636
========
Training loss: 0.358908
========
Training loss: 0.378059
========
Training loss: 0.363146
========
Training loss: 0.394075
========
Training loss: 0.37819
========
Training loss: 0.359737
========
Validation accuracy: 0.7591
Validation loss: 0.398263
Training loss: 0.367992
========
Training loss: 0.351214
========
Training loss: 0.392707
========
Training loss: 0.380686
========
Training loss: 0.337651
========
Training loss: 0.401383
========
Training loss: 0.353768
========
Training loss: 0.368046
========
Training loss: 0.393396
========
Training loss: 0.417103
========
Training loss: 0.352483
========
Training loss: 0.383174
========
Validation accuracy: 0.761167
Validation loss: 0.395689
Training loss: 0.332661
========
Training loss: 0.335187
========
Training loss: 0.393397
========
Training loss: 0.403509
========
Training loss: 0.36022
========
Training loss: 0.340471
========
Training loss: 0.400508
========
Training loss: 0.408547
========
Training loss: 0.390907
========
Training loss: 0.341284
========
Training loss: 0.42169
========
Training loss: 0.411456
========
Validation accuracy: 0.7709
Validation loss: 0.383937
Training loss: 0.387547
========
Training loss: 0.356529
========
Training loss: 0.35274
========
Training loss: 0.379291
========
Training loss: 0.332417
========
Training loss: 0.364451
========
Training loss: 0.396625
========
Training loss: 0.390335
========
Training loss: 0.376923
========
Training loss: 0.389717
========
Training loss: 0.357935
========
Training loss: 0.386733
========
Validation accuracy: 0.751167
Validation loss: 0.397608
Training loss: 0.383708
========
Training loss: 0.342038
========
Training loss: 0.395951
========
Training loss: 0.34849
========
Training loss: 0.388927
========
Training loss: 0.363848
========
Training loss: 0.358541
========
Training loss: 0.378088
========
Training loss: 0.347291
========
Training loss: 0.324071
========
Training loss: 0.377868
========
Training loss: 0.337434
========
Validation accuracy: 0.759933
Validation loss: 0.388035
Training loss: 0.348121
========
Training loss: 0.341595
========
Training loss: 0.395674
========
Training loss: 0.34277
========
Training loss: 0.30856
========
Training loss: 0.392068
========
Training loss: 0.357285
========
Training loss: 0.375989
========
Training loss: 0.343408
========
Training loss: 0.357649
========
Training loss: 0.355186
========
Training loss: 0.387477
========
Validation accuracy: 0.7673
Validation loss: 0.379515
Training loss: 0.359664
========
Training loss: 0.419905
========
Training loss: 0.327934
========
Training loss: 0.304209
========
Training loss: 0.351909
========
Training loss: 0.340624
========
Training loss: 0.360489
========
Training loss: 0.316846
========
Training loss: 0.347728
========
Training loss: 0.369059
========
Training loss: 0.375122
========
Training loss: 0.389113
========
Validation accuracy: 0.742767
Validation loss: 0.394282
Training loss: 0.321496
========
Training loss: 0.373038
========
Training loss: 0.358688
========
Training loss: 0.384899
========
Training loss: 0.363383
========
Training loss: 0.380422
========
Training loss: 0.325839
========
Training loss: 0.35876
========
Training loss: 0.354417
========
Training loss: 0.353459
========
Training loss: 0.358095
========
Training loss: 0.352884
========
Validation accuracy: 0.7551
Validation loss: 0.381455
Training loss: 0.377528
========
Training loss: 0.346446
========
Training loss: 0.319238
========
Training loss: 0.312463
========
Training loss: 0.31116
========
Training loss: 0.333696
========
Training loss: 0.325622
========
Training loss: 0.349865
========
Training loss: 0.338235
========
Training loss: 0.308241
========
Training loss: 0.351423
========
Training loss: 0.335335
========
Validation accuracy: 0.772867
Validation loss: 0.365974
Training loss: 0.334699
========
Training loss: 0.3461
========
Training loss: 0.309125
========
Training loss: 0.324691
========
Training loss: 0.332608
========
Training loss: 0.340062
========
Training loss: 0.321509
========
Training loss: 0.347721
========
Training loss: 0.330984
========
Training loss: 0.355117
========
Training loss: 0.311557
========
Training loss: 0.352691
========
Validation accuracy: 0.7581
Validation loss: 0.374817
Training loss: 0.313769
========
Training loss: 0.361007
========
Training loss: 0.333882
========
Training loss: 0.358383
========
Training loss: 0.34032
========
Training loss: 0.329495
========
Training loss: 0.332489
========
Training loss: 0.363967
========
Training loss: 0.336582
========
Training loss: 0.330455
========
Training loss: 0.343022
========
Training loss: 0.371235
========
Validation accuracy: 0.7266
Validation loss: 0.396991
Training loss: 0.366501
========
Training loss: 0.308373
========
Training loss: 0.303473
========
Training loss: 0.34489
========
Training loss: 0.291335
========
Training loss: 0.307115
========
Training loss: 0.337249
========
Training loss: 0.363143
========
Training loss: 0.312301
========
Training loss: 0.299033
========
Training loss: 0.3503
========
Training loss: 0.344659
========
Validation accuracy: 0.762267
Validation loss: 0.36501
Training loss: 0.285413
========
Training loss: 0.321728
========
Training loss: 0.340368
========
Training loss: 0.358096
========
Training loss: 0.342653
========
Training loss: 0.35611
========
Training loss: 0.290148
========
Training loss: 0.328714
========
Training loss: 0.335892
========
Training loss: 0.3215
========
Training loss: 0.324077
========
Training loss: 0.299873
========
Validation accuracy: 0.7773
Validation loss: 0.35203
Training loss: 0.306626
========
Training loss: 0.317637
========
Training loss: 0.354085
========
Training loss: 0.350836
========
Training loss: 0.326751
========
Training loss: 0.306643
========
Training loss: 0.358518
========
Training loss: 0.360106
========
Training loss: 0.298589
========
Training loss: 0.380429
========
Training loss: 0.32499
========
Training loss: 0.360378
========
Validation accuracy: 0.745667
Validation loss: 0.375979
Training loss: 0.329333
========
Training loss: 0.354572
========
Training loss: 0.323348
========
Training loss: 0.299364
========
Training loss: 0.326478
========
Training loss: 0.331536
========
Training loss: 0.344826
========
Training loss: 0.310334
========
Training loss: 0.303177
========
Training loss: 0.346516
========
Training loss: 0.327557
========
Training loss: 0.345155
========
Validation accuracy: 0.754833
Validation loss: 0.365322
Training loss: 0.314821
========
Training loss: 0.328375
========
Training loss: 0.348606
========
Training loss: 0.305828
========
Training loss: 0.30458
========
Training loss: 0.32072
========
Training loss: 0.321107
========
Training loss: 0.318884
========
Training loss: 0.315101
========
Training loss: 0.293412
========
Training loss: 0.316338
========
Training loss: 0.287555
========
Validation accuracy: 0.756967
Validation loss: 0.357841
Training loss: 0.298115
========
Training loss: 0.288611
========
Training loss: 0.382993
========
Training loss: 0.304637
========
Training loss: 0.286016
========
Training loss: 0.321946
========
Training loss: 0.304003
========
Training loss: 0.293386
========
Training loss: 0.300223
========
Training loss: 0.312128
========
Training loss: 0.296133
========
Training loss: 0.315302
========
Validation accuracy: 0.783333
Validation loss: 0.339463
Training loss: 0.321373
========
Training loss: 0.334135
========
Training loss: 0.305629
========
Training loss: 0.339934
========
Training loss: 0.369568
========
Training loss: 0.289066
========
Training loss: 0.337048
========
Training loss: 0.2863
========
Training loss: 0.293952
========
Training loss: 0.373776
========
Training loss: 0.302961
========
Training loss: 0.341872
========
Validation accuracy: 0.785033
Validation loss: 0.335976
Training loss: 0.321054
========
Training loss: 0.330669
========
Training loss: 0.296366
========
Training loss: 0.309105
========
Training loss: 0.355657
========
Training loss: 0.331608
========
Training loss: 0.277414
========
Training loss: 0.292766
========
Training loss: 0.297577
========
Training loss: 0.313843
========
Training loss: 0.281165
========
Training loss: 0.325967
========
Validation accuracy: 0.7572
Validation loss: 0.355968
Training loss: 0.298664
========
Training loss: 0.308369
========
Training loss: 0.268283
========
Training loss: 0.302582
========
Training loss: 0.360426
========
Training loss: 0.319947
========
Training loss: 0.300042
========
Training loss: 0.276079
========
Training loss: 0.308637
========
Training loss: 0.303526
========
Training loss: 0.33352
========
Training loss: 0.280015
========
Validation accuracy: 0.7327
Validation loss: 0.379255
Training loss: 0.344382
========
Training loss: 0.323884
========
Training loss: 0.279459
========
Training loss: 0.277249
========
Training loss: 0.278077
========
Training loss: 0.334971
========
Training loss: 0.325975
========
Training loss: 0.328345
========
Training loss: 0.288742
========
Training loss: 0.293112
========
Training loss: 0.336117
========
Training loss: 0.29925
========
Validation accuracy: 0.760533
Validation loss: 0.350428
Training loss: 0.337581
========
Training loss: 0.30025
========
Training loss: 0.323213
========
Training loss: 0.287602
========
Training loss: 0.274217
========
Training loss: 0.291308
========
Training loss: 0.261304
========
Training loss: 0.322754
========
Training loss: 0.303968
========
Training loss: 0.277105
========
Training loss: 0.329453
========
Training loss: 0.261057
========
Validation accuracy: 0.7596
Validation loss: 0.350117
Training loss: 0.315532
========
Training loss: 0.286793
========
Training loss: 0.311395
========
Training loss: 0.3231
========
Training loss: 0.299965
========
Training loss: 0.253481
========
Training loss: 0.279555
========
Training loss: 0.311136
========
Training loss: 0.294421
========
Training loss: 0.3005
========
Training loss: 0.290539
========
Training loss: 0.297678
========
Validation accuracy: 0.755867
Validation loss: 0.354284
Training loss: 0.261769
========
Training loss: 0.358319
========
Training loss: 0.271832
========
Training loss: 0.287474
========
Training loss: 0.300248
========
Training loss: 0.251304
========
Training loss: 0.288813
========
Training loss: 0.279518
========
Training loss: 0.288776
========
Training loss: 0.307867
========
Training loss: 0.272189
========
Training loss: 0.293432
========
Validation accuracy: 0.765467
Validation loss: 0.342979
Training loss: 0.26421
========
Training loss: 0.299304
========
Training loss: 0.270905
========
Training loss: 0.23359
========
Training loss: 0.283522
========
Training loss: 0.271357
========
Training loss: 0.319055
========
Training loss: 0.272048
========
Training loss: 0.304179
========
Training loss: 0.300908
========
Training loss: 0.315459
========
Training loss: 0.269084
========

In [13]:
from matplotlib import pyplot as plt
import matplotlib as mpl

plt.style.use('bmh')
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['figure.figsize'] = [10, 7]

completed_iters = display_interval * len(all_training_losses)

ind_training = range(0, completed_iters, display_interval)
plt.plot(ind_training, all_training_losses, c='#7fcdbb')

ind_validation = range(0, completed_iters, test_interval)
plt.plot(ind_validation, all_validation_losses, c='#2c7fb8')
plt.plot(ind_validation, all_validation_accuracies, c='#ff851b')

plt.xlabel('Training steps')

plt.show()


Save and freeze the created graph


In [14]:
saver = tf.train.Saver()
saver.save(sess, './saved_checkpoint', 0, 'checkpoint_state')

tf.train.write_graph(sess.graph.as_graph_def(), '.', 'input_graph.pb')

In [15]:
from freeze_graph import freeze_graph

freeze_graph('./input_graph.pb', '', False, './saved_checkpoint-0',
             'output', 'save/restore_all',
             'save/Const:0', '../second-sight/assets/tensorflow_text_detector.pb',
             False, None)


Converted 8 variables to const ops.
33 ops in the final graph.

In [ ]:
%%bash

cd ..

bazel mobile-install //second-sight:second-sight --start_app